static unsigned long target_pages;
/* We increase/decrease in batches which fit in a page */
-static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
+static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
/* VM /proc information for memory */
extern unsigned long totalram_pages;
blkif_request_t *req,
pending_req_t *pending_req)
{
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct phys_req preq;
DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
- preq.sector_number + preq.nr_sects, preq.dev);
+ preq.sector_number + preq.nr_sects, preq.dev);
goto fail_flush;
}
unsigned char type; /* VDISK_xxx */
u32 pdevice; /* phys device that this vbd maps to */
struct block_device *bdev;
-};
+};
-struct backend_info;
+struct backend_info;
typedef struct blkif_st {
/* Unique identifier for this interface. */
/* The VBD attached to this interface. */
struct vbd vbd;
/* Back pointer to the backend_info. */
- struct backend_info *be;
+ struct backend_info *be;
/* Private fields. */
spinlock_t blk_ring_lock;
atomic_t refcnt;
blkif_sector_t sector_number;
};
-int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
void blkif_interface_init(void);
}
/* setup back pointer */
- be->blkif->be = be;
+ be->blkif->be = be;
err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
&be->backend_watch, backend_changed);
}
/* We're potentially connected now */
- update_blkif_status(be->blkif);
+ update_blkif_status(be->blkif);
}
}
typedef unsigned int PEND_RING_IDX;
static inline int MASK_PEND_IDX(int i) {
- return (i & (MAX_PENDING_REQS-1));
+ return (i & (MAX_PENDING_REQS-1));
}
static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
if (!pending_reqs[mmap_alloc] || !pending_addrs[mmap_alloc]) {
kfree(pending_reqs[mmap_alloc]);
kfree(pending_addrs[mmap_alloc]);
- WPRINTK("%s: out of memory\n", __FUNCTION__);
+ WPRINTK("%s: out of memory\n", __FUNCTION__);
ret = -ENOMEM;
goto done;
}
unsigned long kvaddr, uvaddr;
struct page **map = info->vma->vm_private_data;
struct page *pg;
- int offset;
+ int offset;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start,
>> PAGE_SHIFT;
map[offset] = NULL;
}
- fast_flush_area(pending_req, pending_idx, usr_idx, idx);
+ fast_flush_area(pending_req, pending_idx, usr_idx, idx);
make_response(blkif, pending_req->id, resp->operation,
resp->status);
info->idx_map[usr_idx] = INVALID_REQ;
"ring does not exist!\n");
print_dbug = 0; /*We only print this message once*/
}
- return 1;
+ return 1;
}
info = tapfds[blkif->dev_num];
blkif_request_t *req,
pending_req_t *pending_req)
{
- extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
unsigned int nseg;
#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
-struct backend_info;
+struct backend_info;
typedef struct blkif_st {
/* Unique identifier for this interface. */
blkif_back_ring_t blk_ring;
struct vm_struct *blk_ring_area;
/* Back pointer to the backend_info. */
- struct backend_info *be;
+ struct backend_info *be;
/* Private fields. */
spinlock_t blk_ring_lock;
atomic_t refcnt;
}
/* setup back pointer */
- be->blkif->be = be;
+ be->blkif->be = be;
be->blkif->sectors = 0;
/* set a watch on disk info, waiting for userspace to update details*/
/* Physical parameters of the comms window. */
grant_handle_t tx_shmem_handle;
- grant_ref_t tx_shmem_ref;
+ grant_ref_t tx_shmem_ref;
grant_handle_t rx_shmem_handle;
- grant_ref_t rx_shmem_ref;
+ grant_ref_t rx_shmem_ref;
unsigned int evtchn;
unsigned int irq;
/*
* Add following to poll() function in NAPI driver (Tigon3 is example):
* if ( xen_network_done() )
- * tg3_enable_ints(tp);
+ * tg3_enable_ints(tp);
*/
int xen_network_done(void)
{
static void net_rx_action(unsigned long unused)
{
- netif_t *netif = NULL;
+ netif_t *netif = NULL;
s8 status;
u16 id, irq, flags;
netif_rx_response_t *resp;
if (unlikely(txreq.size < ETH_HLEN)) {
DPRINTK("Bad packet size: %d\n", txreq.size);
netbk_tx_err(netif, &txreq, i);
- continue;
+ continue;
}
/* No crossing a page as the payload mustn't fragment. */
privcmd_mmapbatch_t m;
struct vm_area_struct *vma = NULL;
xen_pfn_t __user *p;
- unsigned long addr, mfn;
+ unsigned long addr, mfn;
int i;
if (copy_from_user(&m, udata, sizeof(m))) {
printk(KERN_WARNING
"xenbus: resume %s failed: %i\n",
dev->bus_id, err);
- return err;
+ return err;
}
}
return err;
}
- return 0;
+ return 0;
}
void xenbus_suspend(void)
for (i = mbi->mods_count-1; i >= 1; i--)
{
struct acm_policy_buffer *pol;
- char *_policy_start;
+ char *_policy_start;
unsigned long _policy_len;
#if defined(__i386__)
_policy_start = (char *)(initial_images_start + (mod[i].mod_start-mod[0].mod_start));
{
printk("%s: ERROR instantiating individual ssids for domain 0x%02x.\n",
__func__, subj->domain_id);
- acm_free_domain_ssid(ssid);
+ acm_free_domain_ssid(ssid);
put_domain(subj);
return ACM_INIT_SSID_ERROR;
}
/* init stats */
atomic_set(&(ste_bin_pol.ec_eval_count), 0);
- atomic_set(&(ste_bin_pol.ec_denied_count), 0);
+ atomic_set(&(ste_bin_pol.ec_denied_count), 0);
atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
atomic_set(&(ste_bin_pol.gt_eval_count), 0);
- atomic_set(&(ste_bin_pol.gt_denied_count), 0);
+ atomic_set(&(ste_bin_pol.gt_denied_count), 0);
atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
return ACM_OK;
}
ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
{
int i;
- struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
+ struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
traceprintk("%s.\n", __func__);
if (ste_ssidp == NULL)
sizeof(domaintype_t),
ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
- /* 2. now re-calculate sharing decisions based on running domains;
+ /* 2. now re-calculate sharing decisions based on running domains;
* this can fail if new policy is conflicting with sharing of running domains
* now: reject violating new policy; future: adjust sharing through revoking sharing */
if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
stats.gt_eval_count = htonl(atomic_read(&ste_bin_pol.gt_eval_count));
stats.ec_denied_count = htonl(atomic_read(&ste_bin_pol.ec_denied_count));
- stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count));
+ stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count));
stats.ec_cachehit_count = htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
stats.gt_cachehit_count = htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
- if (buf_len < sizeof(struct acm_ste_stats_buffer))
+ if (buf_len < sizeof(struct acm_ste_stats_buffer)
return -ENOMEM;
memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
cache_result(subj, obj);
ret = ACM_ACCESS_PERMITTED;
} else {
- atomic_inc(&ste_bin_pol.ec_denied_count);
- ret = ACM_ACCESS_DENIED;
+ atomic_inc(&ste_bin_pol.ec_denied_count);
+ ret = ACM_ACCESS_DENIED;
}
out:
if (obj != NULL)
cache_result(subj, obj);
ret = ACM_ACCESS_PERMITTED;
} else {
- atomic_inc(&ste_bin_pol.ec_denied_count);
- ret = ACM_ACCESS_DENIED;
+ atomic_inc(&ste_bin_pol.ec_denied_count);
+ ret = ACM_ACCESS_DENIED;
}
out:
if (obj != NULL)
cache_result(subj, obj);
ret = ACM_ACCESS_PERMITTED;
} else {
- atomic_inc(&ste_bin_pol.gt_denied_count);
+ atomic_inc(&ste_bin_pol.gt_denied_count);
printkd("%s: ACCESS DENIED!\n", __func__);
- ret = ACM_ACCESS_DENIED;
+ ret = ACM_ACCESS_DENIED;
}
if (obj != NULL)
put_domain(obj);
cache_result(subj, obj);
ret = ACM_ACCESS_PERMITTED;
} else {
- atomic_inc(&ste_bin_pol.gt_denied_count);
- ret = ACM_ACCESS_DENIED;
+ atomic_inc(&ste_bin_pol.gt_denied_count);
+ ret = ACM_ACCESS_DENIED;
}
if (obj != NULL)
put_domain(obj);
int idx;
if (phys + size < 8 * 1024 * 1024)
- return __va(phys);
+ return __va(phys);
offset = phys & (PAGE_SIZE - 1);
mapped_size = PAGE_SIZE - offset;
/* 32b PAE */
if ( (( mfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask )
>> PGT_va_shift) == 3 )
- return l2_table_offset(HYPERVISOR_VIRT_START);
+ return l2_table_offset(HYPERVISOR_VIRT_START);
else
return L2_PAGETABLE_ENTRIES;
#else
set_bit(X86_FEATURE_K8, c->x86_capability);
break;
case 6:
- set_bit(X86_FEATURE_K7, c->x86_capability);
+ set_bit(X86_FEATURE_K7, c->x86_capability);
break;
}
setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
/* PCR0 -- Performance Control */
/* Incrementor Margin 10 */
- setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
+ setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
}
max = cpuid_eax(0x80860000);
cpu_rev = 0;
if ( max >= 0x80860001 ) {
- cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
+ cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
if (cpu_rev != 0x02000000) {
printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
(cpu_rev >> 24) & 0xff,
static __init __attribute__((unused)) int dmi_disable_acpi(struct dmi_blacklist *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
+ printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
disable_acpi();
} else {
printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
+ "Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
static __init __attribute__((unused)) int force_acpi_ht(struct dmi_blacklist *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
+ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
disable_acpi();
- acpi_ht = 1;
+ acpi_ht = 1;
} else {
printk(KERN_NOTICE
- "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
+ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
}
return 0;
}
case DOM0_SHADOW_CONTROL:
{
- struct domain *d;
+ struct domain *d;
ret = -ESRCH;
d = find_domain_by_id(op->u.shadow_control.domain);
if ( d != NULL )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
- l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
+ l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
mpt_alloc += PAGE_SIZE;
*l2tab = l2e_from_paddr((unsigned long)l1start, L2_PROT);
l2tab++;
if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
{
if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
- l3start = l3tab = l4e_to_l3e(*++l4tab);
+ l3start = l3tab = l4e_to_l3e(*++l4tab);
l2start = l2tab = l3e_to_l2e(*l3tab);
}
l1start = l1tab = l2e_to_l1e(*l2tab);
dmi_bigsmp = 1;
else
dmi_check_system(bigsmp_dmi_table);
- return dmi_bigsmp;
+ return dmi_bigsmp;
}
struct genapic apic_bigsmp = {
unsigned char type;
unsigned char length;
unsigned char resv[6];
- unsigned long long start;
- unsigned long long size;
+ unsigned long long start;
+ unsigned long long size;
};
struct es7000_oem_table {
for (i = 0; !changed && apic_probe[i]; i++) {
if (apic_probe[i]->probe()) {
changed = 1;
- genapic = apic_probe[i];
+ genapic = apic_probe[i];
}
}
if (!changed)
printf("Huh? We got a GP Fault with an invalid IDTR!\n");
svm_dump_vmcb(__func__, vmcb);
svm_dump_regs(__func__, regs);
- svm_dump_inst(vmcb->rip);
+ svm_dump_inst(vmcb->rip);
__hvm_bug(regs);
}
{
case TYPE_MOV_TO_DR:
inst_len = __get_instruction_length(vmcb, INSTR_MOV2DR, buffer);
- v->arch.guest_context.debugreg[reg] = *reg_p;
+ v->arch.guest_context.debugreg[reg] = *reg_p;
break;
case TYPE_MOV_FROM_DR:
inst_len = __get_instruction_length(vmcb, INSTR_MOVDR2, buffer);
goto err_out;
}
- /* update the HSA for the current Core */
-#if 0
- set_hsa_to_guest( arch_svm );
-#endif
arch_svm->vmcb_pa = (u64) virt_to_maddr(arch_svm->vmcb);
if ((error = construct_vmcb_controls(arch_svm)))
}
default:
{
- printk(KERN_ERR "unknown bus type %d.\n",bus);
+ printk(KERN_ERR "unknown bus type %d.\n",bus);
irq = 0;
break;
}
unsigned int val[2];
uci->sig = uci->pf = uci->rev = uci->cksum = 0;
- uci->err = MC_NOTFOUND;
+ uci->err = MC_NOTFOUND;
uci->mc = NULL;
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
int ret;
if (len < DEFAULT_UCODE_TOTALSIZE) {
- printk(KERN_ERR "microcode: not enough data\n");
+ printk(KERN_ERR "microcode: not enough data\n");
return -EINVAL;
}
#if CONFIG_PAGING_LEVELS >= 4
if ( unlikely(!l2_backptr(&vaddr, pgentry_ptr_to_slot(pl3e), type)) ||
unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
- return 0;
+ return 0;
#else
vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
<< L3_PAGETABLE_SHIFT;
goto out;
}
- perfc_incrc(calls_to_mmu_update);
+ perfc_incrc(calls_to_mmu_update);
perfc_addc(num_page_updates, count);
perfc_incr_histo(bpt_updates, count, PT_UPDATES);
* doing this ....
*/
-static int mpc_record;
+static int mpc_record;
static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
#ifdef CONFIG_X86_NUMAQ
static void nmi_cpu_save_registers(struct op_msrs *msrs)
{
unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
+ unsigned int const nr_ctrls = model->num_controls;
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
static void nmi_restore_registers(struct op_msrs * msrs)
{
unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
+ unsigned int const nr_ctrls = model->num_controls;
struct op_msr * counters = msrs->counters;
struct op_msr * controls = msrs->controls;
unsigned int i;
static void p4_fill_in_addresses(struct op_msrs * const msrs)
{
- unsigned int i;
+ unsigned int i;
unsigned int addr, stag;
setup_num_counters();
active_ready[ind] = 0;
active_domains[ind] = NULL;
activated--;
- put_domain(d);
+ put_domain(d);
if ( activated <= 0 )
adomains = 0;
if ( adomains >= MAX_OPROF_DOMAINS )
return -E2BIG;
- d = find_domain_by_id(domid);
+ d = find_domain_by_id(domid);
if ( d == NULL )
return -EINVAL;
if ( copy_from_guest(&passive, arg, 1) )
return -EFAULT;
- d = find_domain_by_id(passive.domain_id);
+ d = find_domain_by_id(passive.domain_id);
if ( d == NULL )
return -EINVAL;
/* maxcpus: maximum number of CPUs to activate. */
static unsigned int max_cpus = NR_CPUS;
-integer_param("maxcpus", max_cpus);
+integer_param("maxcpus", max_cpus);
/* opt_watchdog: If true, run a watchdog NMI on each processor. */
static int opt_watchdog = 0;
{
if ( entry_get_flags(shadow_pt[i]) & _PAGE_PRESENT )
put_shadow_ref(entry_get_pfn(shadow_pt[i]));
- shadow_pt[i] = entry_empty();
+ shadow_pt[i] = entry_empty();
continue;
}
{
if ( l4e_get_flags(shadow4[i]) & _PAGE_PRESENT )
put_shadow_ref(l4e_get_pfn(shadow4[i]));
- shadow4[i] = l4e_empty();
+ shadow4[i] = l4e_empty();
continue;
}
{
int need_flush = 0;
- need_flush |= resync_all(d, PGT_l1_shadow);
+ need_flush |= resync_all(d, PGT_l1_shadow);
if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
shadow_mode_translate(d) )
{
* Xen private mappings. Use the va_mask part.
*/
mfn_to_page(s2mfn)->u.inuse.type_info |=
- (unsigned long) 3 << PGT_score_shift;
+ (unsigned long) 3 << PGT_score_shift;
memset(spl2e, 0,
(L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)) * sizeof(l2_pgentry_t));
int new_modes = (mode & ~d->arch.shadow_mode);
if(!new_modes) /* Nothing to do - return success */
- return 0;
+ return 0;
// can't take anything away by calling this function.
ASSERT(!(d->arch.shadow_mode & ~mode));
audit_adjust_pgtables(d, -1, 1);
- for (list_ent = d->page_list.next; list_ent != &d->page_list;
+ for (list_ent = d->page_list.next; list_ent != &d->page_list;
list_ent = page->list.next) {
page = list_entry(list_ent, struct page_info, list);
}
if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
- sc->pages = d->arch.shadow_dirty_bitmap_size;
+ sc->pages = d->arch.shadow_dirty_bitmap_size;
#define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
for ( i = 0; i < sc->pages; i += chunk )
}
if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
- sc->pages = d->arch.shadow_dirty_bitmap_size;
+ sc->pages = d->arch.shadow_dirty_bitmap_size;
if ( copy_to_guest(sc->dirty_bitmap,
d->arch.shadow_dirty_bitmap,
__shadow_get_l2e(entry->v, entry->va, &l2e);
if (l2e_get_flags(l2e) & _PAGE_PRESENT) {
- put_shadow_ref(l2e_get_pfn(l2e));
+ put_shadow_ref(l2e_get_pfn(l2e));
l2e = l2e_empty();
__shadow_set_l2e(entry->v, entry->va, l2e);
((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
{
- for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
+ for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
i++ )
printk("+++ (%d) %lx %lx\n",i,
if ( shadow_mode_refcounts(d) )
{
- struct list_head *list_ent;
+ struct list_head *list_ent;
struct page_info *page;
/*
audit_adjust_pgtables(d, -1, 1);
- for (list_ent = d->page_list.next; list_ent != &d->page_list;
+ for (list_ent = d->page_list.next; list_ent != &d->page_list;
list_ent = page->list.next) {
page = list_entry(list_ent, struct page_info, list);
}
if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
- sc->pages = d->arch.shadow_dirty_bitmap_size;
+ sc->pages = d->arch.shadow_dirty_bitmap_size;
#define chunk (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
for ( i = 0; i < sc->pages; i += chunk )
}
if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
- sc->pages = d->arch.shadow_dirty_bitmap_size;
+ sc->pages = d->arch.shadow_dirty_bitmap_size;
if ( copy_to_guest(sc->dirty_bitmap,
d->arch.shadow_dirty_bitmap,
#endif
#if CONFIG_PAGING_LEVELS >= 3
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(mfn);
#ifdef CONFIG_X86_PAE
l3t += (cr3 & 0xFE0UL) >> 3;
#endif
(32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
/* M2P table is mappable read-only by privileged domains. */
- for ( v = RDWR_MPT_VIRT_START;
+ for ( v = RDWR_MPT_VIRT_START;
v != RDWR_MPT_VIRT_END;
v += 1 << L2_PAGETABLE_SHIFT )
{
/* Skip the NMI and DF stacks. */
stack = &stack[2048];
- wrmsr(MSR_LSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
+ wrmsr(MSR_LSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
/* movq %rsp, saversp(%rip) */
stack[0] = 0x48;
/* Skip the long-mode entry trampoline. */
stack = &stack[26];
- wrmsr(MSR_CSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
+ wrmsr(MSR_CSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
/* movq %rsp, saversp(%rip) */
stack[0] = 0x48;
ret = read_console_ring(
op->u.readconsole.buffer,
&op->u.readconsole.count,
- op->u.readconsole.clear);
+ op->u.readconsole.clear);
if ( copy_to_guest(u_dom0_op, op, 1) )
ret = -EFAULT;
}
case DOM0_SETDOMAINMAXMEM:
{
- struct domain *d;
+ struct domain *d;
unsigned long new_max;
ret = -ESRCH;
case DOM0_SETDEBUGGING:
{
- struct domain *d;
+ struct domain *d;
ret = -ESRCH;
d = find_domain_by_id(op->u.setdebugging.domain);
if ( d != NULL )
int rc = 0;
struct vcpu_guest_context *c = NULL;
unsigned long vcpu = setvcpucontext->vcpu;
- struct vcpu *v;
+ struct vcpu *v;
if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
return -EINVAL;
irq_keyhandler_t *irq_handler;
} u;
unsigned int flags;
- char desc[STR_MAX];
-} key_table[KEY_MAX];
+ char desc[STR_MAX];
+} key_table[KEY_MAX];
#define KEYHANDLER_IRQ_CALLBACK 0x1
key_table[key].u.handler = handler;
key_table[key].flags = 0;
strncpy(key_table[key].desc, desc, STR_MAX);
- key_table[key].desc[STR_MAX-1] = '\0';
+ key_table[key].desc[STR_MAX-1] = '\0';
}
void register_irq_keyhandler(
key_table[key].u.irq_handler = handler;
key_table[key].flags = KEYHANDLER_IRQ_CALLBACK;
strncpy(key_table[key].desc, desc, STR_MAX);
- key_table[key].desc[STR_MAX-1] = '\0';
+ key_table[key].desc[STR_MAX-1] = '\0';
}
static void show_handlers(unsigned char key)
{
- int i;
+ int i;
printk("'%c' pressed -> showing installed handlers\n", key);
for ( i = 0; i < KEY_MAX; i++ )
if ( key_table[i].u.handler != NULL )
{
unsigned int cpu;
- printk("'%c' pressed -> dumping registers\n", key);
+ printk("'%c' pressed -> dumping registers\n", key);
/* Get local execution state out immediately, in case we get stuck. */
printk("\n*** Dumping CPU%d state: ***\n", smp_processor_id());
static void halt_machine(unsigned char key, struct cpu_user_regs *regs)
{
- printk("'%c' pressed -> rebooting machine\n", key);
- machine_restart(NULL);
+ printk("'%c' pressed -> rebooting machine\n", key);
+ machine_restart(NULL);
}
static void cpuset_print(char *set, int size, cpumask_t mask)
char cpuset[100];
printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key,
- (u32)(now>>32), (u32)now);
+ (u32)(now>>32), (u32)now);
read_lock(&domlist_lock);
open_softirq(KEYPRESS_SOFTIRQ, keypress_softirq);
register_irq_keyhandler(
- 'd', dump_registers, "dump registers");
+ 'd', dump_registers, "dump registers");
register_keyhandler(
'h', show_handlers, "show this message");
register_keyhandler(
register_keyhandler(
'r', dump_runq, "dump run queues");
register_irq_keyhandler(
- 'R', halt_machine, "reboot machine");
+ 'R', halt_machine, "reboot machine");
register_keyhandler(
't', read_clocks, "display multi-cpu clock info");
#ifdef PERF_COUNTERS
register_keyhandler(
- 'p', perfc_printall, "print performance counters");
+ 'p', perfc_printall, "print performance counters");
register_keyhandler(
- 'P', perfc_reset, "reset performance counters");
+ 'P', perfc_reset, "reset performance counters");
#endif
register_irq_keyhandler('%', do_debug_key, "Trap to xendbg");
static struct task_slice bvt_do_schedule(s_time_t now)
{
struct domain *d;
- struct vcpu *prev = current, *next = NULL, *next_prime, *ed;
+ struct vcpu *prev = current, *next = NULL, *next_prime, *ed;
int cpu = prev->processor;
s32 r_time; /* time for new dom to run */
u32 next_evt, next_prime_evt, min_avt;
/*Advaced Parameters*/
/*Latency Scaling*/
- s_time_t period_orig;
+ s_time_t period_orig;
s_time_t slice_orig;
s_time_t latency;
s_time_t unblock_abs;
/*scores for {util, block penalty}-weighted extratime distribution*/
- int score[2];
+ int score[2];
s_time_t short_block_lost_tot;
/*Statistics*/
struct list_head *list = EXTRALIST(d,i);
ASSERT(extraq_on(d,i));
PRINT(3, "Removing domain %i.%i from L%i extraq\n",
- d->domain->domain_id, d->vcpu_id, i);
+ d->domain->domain_id, d->vcpu_id, i);
list_del(list);
list->next = NULL;
ASSERT(!extraq_on(d, i));
PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
" to L%i extraq\n",
d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->score[i],
- EDOM_INFO(d)->short_block_lost_tot, i);
+ EDOM_INFO(d)->short_block_lost_tot, i);
/*
* Iterate through all elements to find our "hole" and on our way
printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
SCHED_OP(dump_settings);
- printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
+ printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
for_each_online_cpu ( i )
{
{
struct timer *t;
struct timers *ts;
- unsigned long flags;
+ unsigned long flags;
s_time_t now = NOW();
int i, j;
printk("Dumping timer queues: NOW=0x%08X%08X\n",
- (u32)(now>>32), (u32)now);
+ (u32)(now>>32), (u32)now);
for_each_online_cpu( i )
{
u32 max_ssidrefs;
domaintype_t *ssidrefs; /* [max_ssidrefs][max_types] */
atomic_t ec_eval_count, gt_eval_count;
- atomic_t ec_denied_count, gt_denied_count;
+ atomic_t ec_denied_count, gt_denied_count;
atomic_t ec_cachehit_count, gt_cachehit_count;
};
extern int acpi_pci_disabled;
static inline void disable_acpi(void)
{
- acpi_disabled = 1;
+ acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
struct mpc_config_processor;
struct genapic {
- char *name;
- int (*probe)(void);
+ char *name;
+ int (*probe)(void);
/* When one of the next two hooks returns 1 the genapic
is switched to this. Essentially they are additional probe
/* Interrupt delivery parameters ('physical' vs. 'logical flat'). */
int int_delivery_mode;
- int int_dest_mode;
+ int int_dest_mode;
void (*init_apic_ldr)(void);
void (*clustered_apic_check)(void);
cpumask_t (*target_cpus)(void);
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
void (*send_IPI_mask)(cpumask_t mask, int vector);
-};
+};
#define APICFUNC(x) .x = x
* 1) TS bit in guest cr0
* 2) TSC offset in guest
*/
- void (*stts)(struct vcpu *v);
- void (*set_tsc_offset)(struct vcpu *v, u64 offset);
+ void (*stts)(struct vcpu *v);
+ void (*set_tsc_offset)(struct vcpu *v, u64 offset);
void (*init_ap_context)(struct vcpu_guest_context *ctxt,
int vcpuid, int trampoline_vector);
return 0; /* force to fail */
}
-extern void hvm_stts(struct vcpu *v);
-extern void hvm_set_guest_time(struct vcpu *v, u64 gtime);
-extern void hvm_do_resume(struct vcpu *v);
+extern void hvm_stts(struct vcpu *v);
+extern void hvm_set_guest_time(struct vcpu *v, u64 gtime);
+extern void hvm_do_resume(struct vcpu *v);
static inline void
hvm_init_ap_context(struct vcpu_guest_context *ctxt,
u64 ev: 1;
u64 resvd1: 19;
u64 v: 1;
- u64 errorcode:32;
+ u64 errorcode:32;
} fields;
} __attribute__ ((packed)) eventinj_t;
extern int start_vmx(void);
extern void vmcs_dump_vcpu(void);
extern void vmx_init_vmcs_config(void);
-extern void setup_vmcs_dump(void);
+extern void setup_vmcs_dump(void);
enum {
VMX_CPU_STATE_PAE_ENABLED=0,
/* Hook from generic ACPI tables.c */
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- unsigned long oem_addr;
+ unsigned long oem_addr;
if (!find_unisys_acpi_oem_table(&oem_addr)) {
if (es7000_check_dsdt())
return parse_unisys_oem((char *)oem_addr);
{
}
-int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid);
-int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
+int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid);
+int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
#endif
: "=a" (a__), "=d" (b__) \
: "c" (msr)); \
val = a__ | ((u64)b__<<32); \
-} while(0);
+} while(0);
#define wrmsr(msr,val1,val2) \
__asm__ __volatile__("wrmsr" \
static inline unsigned long read_cr0(void)
{
- unsigned long __cr0;
- __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
- return __cr0;
+ unsigned long __cr0;
+ __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
+ return __cr0;
}
static inline void write_cr0(unsigned long val)
{
- __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
+ __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
}
static inline unsigned long read_cr4(void)
{
- unsigned long __cr4;
- __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
- return __cr4;
+ unsigned long __cr4;
+ __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
+ return __cr4;
}
static inline void write_cr4(unsigned long val)
{
- __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
+ __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
}
a = a->next;
while ( a && (live < 9999) )
{
- live++;
+ live++;
if ( (a->gpfn_and_flags == 0) || (a->smfn == 0) )
{
printk("XXX live=%d gpfn+flags=%lx sp=%lx next=%p\n",
BUG();
}
ASSERT(a->smfn);
- a = a->next;
+ a = a->next;
}
ASSERT(live < 9999);
}
for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
- free++;
+ free++;
if ( print )
printk("Xlive=%d free=%d\n", live, free);
if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
return 0;
- return l1e_get_paddr(gpte) + (gva & ~PAGE_MASK);
+ return l1e_get_paddr(gpte) + (gva & ~PAGE_MASK);
}
#endif
#endif
#else
case 4:
- return PAE_SHADOW_SELF_ENTRY;
+ return PAE_SHADOW_SELF_ENTRY;
#endif
#endif
default:
: "a" (c), "r" (count%BYTES_PER_LONG),
"0" (count/BYTES_PER_LONG), "1" (s)
: "memory" );
- return s;
+ return s;
}
/*
#include <asm/x86_32/uaccess.h>
#endif
-unsigned long copy_to_user(void *to, const void *from, unsigned len);
-unsigned long copy_from_user(void *to, const void *from, unsigned len);
+unsigned long copy_to_user(void *to, const void *from, unsigned len);
+unsigned long copy_from_user(void *to, const void *from, unsigned len);
/* Handles exceptions in both to and from, but doesn't do access_ok */
unsigned long __copy_to_user_ll(void *to, const void *from, unsigned n);
unsigned long __copy_from_user_ll(void *to, const void *from, unsigned n);
return ret;
case 8:
__get_user_size(*(u64*)to, from, 8, ret, 8);
- return ret;
+ return ret;
}
}
return __copy_from_user_ll(to, from, n);
#ifdef CRASH_DEBUG
/* value <-> char (de)serialzers for arch specific gdb backends */
-char hex2char(unsigned long x);
-int char2hex(unsigned char c);
-char str2hex(const char *str);
-unsigned long str2ulong(const char *str, unsigned long bytes);
+char hex2char(unsigned long x);
+int char2hex(unsigned char c);
+char str2hex(const char *str);
+unsigned long str2ulong(const char *str, unsigned long bytes);
struct gdb_context {
int serhnd;
const char *buf, int count, struct gdb_context *ctx);
void gdb_write_to_packet_hex(
unsigned long x, int int_size, struct gdb_context *ctx);
-void gdb_send_packet(struct gdb_context *ctx);
+void gdb_send_packet(struct gdb_context *ctx);
void gdb_send_reply(const char *buf, struct gdb_context *ctx);
/* gdb stub trap handler: entry point */
/******************************************************************************
* keyhandler.h
*
- * We keep an array of 'handlers' for each key code between 0 and 255;
+ * We keep an array of 'handlers' for each key code between 0 and 255;
* this is intended to allow very simple debugging routines (toggle
* debug flag, dump registers, reboot, etc) to be hooked in in a slightly
* nicer fashion than just editing the serial/keyboard drivers.
*/
typedef void keyhandler_t(unsigned char key);
extern void register_keyhandler(
- unsigned char key, keyhandler_t *handler, char *desc);
+ unsigned char key, keyhandler_t *handler, char *desc);
/*
* Register an IRQ callback function for key @key. The callback occurs
*/
typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs);
extern void register_irq_keyhandler(
- unsigned char key, irq_keyhandler_t *handler, char *desc);
+ unsigned char key, irq_keyhandler_t *handler, char *desc);
/* Inject a keypress into the key-handling subsystem. */
extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs);
static __inline__ void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
- INIT_LIST_HEAD(entry);
+ INIT_LIST_HEAD(entry);
}
/**